上一次讲解了一下startPreview过程,主要是为了画出一条大致的从上到下的线条,今天我们看一下Camera在Framework的sendCommand和dataCallback,这部分属于衔接过程,可以看到上下是如何交流沟通的。
首先,sendCommand Camera.java中并没有sendCommand方法,在Camera.cpp中存在sendCommand函数,所以这个sendCommand是从android_hardware_interface.cpp中开始使用的
android_hardware_Camera.cpp (base\core\jni)
startSmoothZoom android_hardware_Camera_startSmoothZoom ——>CAMERA_CMD_START_SMOOTH_ZOOM
stopSmoothZoom android_hardware_Camera_stopSmoothZoom ——>CAMERA_CMD_STOP_SMOOTH_ZOOM
setDisplayOrientation android_hardware_Camera_setDisplayOrientation ——>CAMERA_CMD_SET_DISPLAY_ORIENTATION
_enableShutterSound android_hardware_Camera_enableShutterSound ——>CAMERA_CMD_ENABLE_SHUTTER_SOUND
_startFaceDetection android_hardware_Camera_startFaceDetection ——>CAMERA_CMD_START_FACE_DETECTION
_stopFaceDetection android_hardware_Camera_stopFaceDetection ——>CAMERA_CMD_STOP_FACE_DETECTION
enableFocusMoveCallback android_hardware_Camera_enableFocusMoveCallback ——>CAMERA_CMD_ENABLE_FOCUS_MOVE_MSG
诸如此类的命令类型定义在Camera.h (system\core\include\system) enum { CAMERA_CMD_START_SMOOTH_ZOOM = 1, CAMERA_CMD_STOP_SMOOTH_ZOOM = 2, CAMERA_CMD_SET_DISPLAY_ORIENTATION = 3, CAMERA_CMD_ENABLE_SHUTTER_SOUND = 4, CAMERA_CMD_PLAY_RECORDING_SOUND = 5, CAMERA_CMD_START_FACE_DETECTION = 6, CAMERA_CMD_STOP_FACE_DETECTION = 7, CAMERA_CMD_ENABLE_FOCUS_MOVE_MSG = 8, CAMERA_CMD_PING = 9, CAMERA_CMD_SET_VIDEO_BUFFER_COUNT = 10, };
以上者几种操作都是采用sendCommand()的函数来实现的,对应的命令类型也列举出来了,HAL层会根据这个消息类型做出判断,然后做出对应的操作。 来看下sendCommand的实现: Camera.cpp (frameworks\av\camera)
1 2 3 4 5 6 7 8 status_t Camera::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2){ ALOGV("sendCommand" ); sp <ICamera> c = mCamera; if (c == 0 ) return NO_INIT; return c->sendCommand(cmd, arg1, arg2); }
然后通过Binder机制,
1 2 3 4 5 6 7 8 9 10 11 virtual status_t sendCommand (int32_t cmd, int32_t arg1, int32_t arg2) { ALOGV("sendCommand" ); Parcel data, reply; data.writeInterfaceToken(ICamera::getInterfaceDescriptor()); data.writeInt32(cmd); data.writeInt32(arg1); data.writeInt32(arg2); remote()->transact(SEND_COMMAND, data, &reply); return reply.readInt32(); }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 status_t BnCamera::onTransact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { ...... case SEND_COMMAND: { ALOGV("SEND_COMMAND" ); CHECK_INTERFACE(ICamera, data, reply); int command = data.readInt32(); int arg1 = data.readInt32(); int arg2 = data.readInt32(); reply->writeInt32(sendCommand(command, arg1, arg2)); return NO_ERROR; } break ; }
然后调用到
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 status_t CameraClient::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) { ...... if (cmd == CAMERA_CMD_SET_DISPLAY_ORIENTATION) { orientation = getOrientation(arg1, mCameraFacing == CAMERA_FACING_FRONT); if (orientation == -1 ) return BAD_VALUE; if (mOrientation != orientation) { mOrientation = orientation; if (mPreviewWindow != 0 ) { native_window_set_buffers_transform(mPreviewWindow.get(), mOrientation); } } return OK; } else if (cmd == CAMERA_CMD_ENABLE_SHUTTER_SOUND) { switch (arg1) { case 0 : return enableShutterSound(false ); case 1 : return enableShutterSound(true ); default : return BAD_VALUE; } return OK; } else if (cmd == CAMERA_CMD_PLAY_RECORDING_SOUND) { mCameraService->playSound(CameraService::SOUND_RECORDING); } else if (cmd == CAMERA_CMD_SET_VIDEO_BUFFER_COUNT) { return INVALID_OPERATION; } else if (cmd == CAMERA_CMD_PING) { return OK; } return mHardware->sendCommand(cmd, arg1, arg2); }
在HAL层中处理的消息类型主要是打开和停止人脸检测过程, QCamera2HWI.cpp (\device\asus\flo\camera\qcamera2\hal)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 int QCamera2HardwareInterface::sendCommand(int32_t command, int32_t , int32_t ){ int rc = NO_ERROR; switch (command) { case CAMERA_CMD_START_FACE_DETECTION: case CAMERA_CMD_STOP_FACE_DETECTION: rc = setFaceDetection(command == CAMERA_CMD_START_FACE_DETECTION? true : false ); break ; default : rc = NO_ERROR; break ; } return rc; }
综上,如上就是sendCommand的过程
然后,回调Callback 从之前的文章可以看到callback主要有三种类型 notifyCallback dataCallback dataTimestampCallback
这个过程我们就不能按照之前的从上至下的跟过程了,这次需要反着来,从HAL层回调到 CameraHardwareInterface.h (frameworks\av\services\camera\libcameraservice\device1)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 static void __notify_cb(int32_t msg_type, int32_t ext1, int32_t ext2, void *user) { ALOGV("%s" , __FUNCTION__); CameraHardwareInterface *__this = static_cast <CameraHardwareInterface *>(user); __this->mNotifyCb(msg_type, ext1, ext2, __this->mCbUser); } static void __data_cb(int32_t msg_type, const camera_memory_t *data, unsigned int index, camera_frame_metadata_t *metadata, void *user) { ALOGV("%s" , __FUNCTION__); CameraHardwareInterface *__this = static_cast <CameraHardwareInterface *>(user); ...... __this->mDataCb(msg_type, mem->mBuffers[index], metadata, __this->mCbUser); } static void __data_cb_timestamp(nsecs_t timestamp, int32_t msg_type, const camera_memory_t *data, unsigned index, void *user) { ALOGV("%s" , __FUNCTION__); CameraHardwareInterface *__this = static_cast <CameraHardwareInterface *>(user); ...... __this->mDataCbTimestamp(timestamp, msg_type, mem->mBuffers[index], __this->mCbUser); }
其中的mNotifyCb,mDataCb,mDataCbTimestamp是在CameraClient::initialize函数中设置的
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 void setCallbacks (notify_callback notify_cb, data_callback data_cb, data_callback_timestamp data_cb_timestamp, void * user) { mNotifyCb = notify_cb; mDataCb = data_cb; mDataCbTimestamp = data_cb_timestamp; mCbUser = user; ALOGV("%s(%s)" , __FUNCTION__, mName.string ()); if (mDevice->ops->set_callbacks) { mDevice->ops->set_callbacks(mDevice, __notify_cb, __data_cb, __data_cb_timestamp, __get_memory, this ); } }
回调自然是到CameraClient中去找了
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 void CameraClient::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2, void * user) { ...... } void CameraClient::dataCallback(int32_t msgType, const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata, void * user) { LOG2("dataCallback(%d)" , msgType); Mutex* lock = getClientLockFromCookie(user); if (lock == NULL ) return ; Mutex::Autolock alock (*lock) ; CameraClient* client = static_cast <CameraClient*>(getClientFromCookie(user)); if (client == NULL ) return ; if (!client->lockIfMessageWanted(msgType)) return ; if (dataPtr == 0 && metadata == NULL ) { ALOGE("Null data returned in data callback" ); client->handleGenericNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0 ); return ; } switch (msgType & ~CAMERA_MSG_PREVIEW_METADATA) { case CAMERA_MSG_PREVIEW_FRAME: client->handlePreviewData(msgType, dataPtr, metadata); break ; case CAMERA_MSG_POSTVIEW_FRAME: client->handlePostview(dataPtr); break ; case CAMERA_MSG_RAW_IMAGE: client->handleRawPicture(dataPtr); break ; case CAMERA_MSG_COMPRESSED_IMAGE: client->handleCompressedPicture(dataPtr); break ; default : client->handleGenericData(msgType, dataPtr, metadata); break ; } } void CameraClient::dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr, void * user) { ...... }
这里主要看下dataCallback的过程吧,
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 switch (msgType & ~CAMERA_MSG_PREVIEW_METADATA) { case CAMERA_MSG_PREVIEW_FRAME: client->handlePreviewData(msgType, dataPtr, metadata); break ; case CAMERA_MSG_POSTVIEW_FRAME: client->handlePostview(dataPtr); break ; case CAMERA_MSG_RAW_IMAGE: client->handleRawPicture(dataPtr); break ; case CAMERA_MSG_COMPRESSED_IMAGE: client->handleCompressedPicture(dataPtr); break ; default : client->handleGenericData(msgType, dataPtr, metadata); break ; }
这里最后会调用到 c->dataCallback,然后根据消息类型来做处理,然后在通过binder机制
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 void dataCallback (int32_t msgType, const sp<IMemory>& imageData, camera_frame_metadata_t *metadata) { ALOGV("dataCallback" ); Parcel data, reply; data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor()); data.writeInt32(msgType); data.writeStrongBinder(imageData->asBinder()); if (metadata) { data.writeInt32(metadata->number_of_faces); data.write(metadata->faces, sizeof (camera_face_t ) * metadata->number_of_faces); } remote()->transact(DATA_CALLBACK, data, &reply, IBinder::FLAG_ONEWAY); }
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 status_t BnCameraClient::onTransact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { switch (code) { ...... case DATA_CALLBACK: { ALOGV("DATA_CALLBACK" ); CHECK_INTERFACE(ICameraClient, data, reply); int32_t msgType = data.readInt32(); sp<IMemory> imageData = interface_cast<IMemory>(data.readStrongBinder()); camera_frame_metadata_t *metadata = NULL ; if (data.dataAvail() > 0 ) { metadata = new camera_frame_metadata_t ; metadata->number_of_faces = data.readInt32(); metadata->faces = (camera_face_t *) data.readInplace( sizeof (camera_face_t ) * metadata->number_of_faces); } dataCallback(msgType, imageData, metadata); if (metadata) delete metadata; return NO_ERROR; } break ; ...... }
这里转到Camera.cpp
1 2 3 4 5 6 7 8 9 10 11 12 13 void Camera::dataCallback(int32_t msgType, const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata) { sp<CameraListener> listener; { Mutex::Autolock _l(mLock); listener = mListener; } if (listener != NULL ) { listener->postData(msgType, dataPtr, metadata); } }
通过listener的方式来往上层甩数据,那么问题来了,这个listener是什么时候设置的? 回想一下第一篇FWK分析博客中的native_setup过程中有这么一段
1 2 3 sp<JNICameraContext> context = new JNICameraContext(env, weak_this, clazz, camera); context->incStrong((void *)android_hardware_Camera_native_setup); camera->setListener(context);
就是在这里设置的listener,JNICameraContext继承CameraListener,复写父类的方法
1 2 3 4 5 6 7 8 9 10 11 12 class JNICameraContext : public CameraListener{ ...... } class CameraListener : virtual public RefBase{ public : virtual void notify (int32_t msgType, int32_t ext1, int32_t ext2) = 0 ; virtual void postData (int32_t msgType, const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata) = 0 ; virtual void postDataTimestamp (nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) = 0 ; };
承接上面那一段,这里我们只看postData() android_hardware_Camera.cpp (frameworks\base\core\jni)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 void JNICameraContext::postData(int32_t msgType, const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata) { ...... int32_t dataMsgType = msgType & ~CAMERA_MSG_PREVIEW_METADATA; switch (dataMsgType) { case CAMERA_MSG_VIDEO_FRAME: break ; case CAMERA_MSG_RAW_IMAGE: ALOGV("rawCallback" ); if (mRawImageCallbackBuffers.isEmpty()) { env->CallStaticVoidMethod(mCameraJClass, fields.post_event, mCameraJObjectWeak, dataMsgType, 0 , 0 , NULL ); } else { copyAndPost(env, dataPtr, dataMsgType); } break ; case 0 : break ; default : ALOGV("dataCallback(%d, %p)" , dataMsgType, dataPtr.get()); copyAndPost(env, dataPtr, dataMsgType); break ; } if (metadata && (msgType & CAMERA_MSG_PREVIEW_METADATA)) { postMetadata(env, CAMERA_MSG_PREVIEW_METADATA, metadata); } }
这里涉及到的 env->CallStaticVoidMethod(mCameraJClass, fields.post_event, mCameraJObjectWeak, dataMsgType, 0, 0, NULL); copyAndPost(env, dataPtr, dataMsgType); postMetadata(env, CAMERA_MSG_PREVIEW_METADATA, metadata); 直接或间接的使用到fileds.post_event函数,这里是JNI中的方法注册, fields.post_event = env->GetStaticMethodID(clazz, “postEventFromNative”, “(Ljava/lang/Object;IIILjava/lang/Object;)V”); 这个是在register_android_hardware_Camera()函数中调用的,这里不做过多停留,这个实际会调用到 Camera.java (frameworks\base\core\java\android\hardware)
1 2 3 4 5 6 7 8 9 10 11 12 private static void postEventFromNative (Object camera_ref, int what, int arg1, int arg2, Object obj) { Camera c = (Camera)((WeakReference)camera_ref).get(); if (c == null) return ; if (c.mEventHandler != null) { Message m = c.mEventHandler.obtainMessage(what, arg1, arg2, obj); c.mEventHandler.sendMessage(m); } }
这里也是采用了JAVA中很常用的handler message处理
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 private class EventHandler extends Handler { private final Camera mCamera; public EventHandler (Camera c, Looper looper) { super(looper); mCamera = c; } @Override public void handleMessage (Message msg) { switch (msg.what) { case CAMERA_MSG_SHUTTER: if (mShutterCallback != null) { mShutterCallback.onShutter(); } return ; case CAMERA_MSG_RAW_IMAGE: if (mRawImageCallback != null) { mRawImageCallback.onPictureTaken((byte[])msg.obj, mCamera); } return ; case CAMERA_MSG_COMPRESSED_IMAGE: if (mJpegCallback != null) { mJpegCallback.onPictureTaken((byte[])msg.obj, mCamera); } return ; case CAMERA_MSG_PREVIEW_FRAME: PreviewCallback pCb = mPreviewCallback; if (pCb != null) { if (mOneShot) { mPreviewCallback = null; } else if (!mWithBuffer) { setHasPreviewCallback(true , false ); } pCb.onPreviewFrame((byte[])msg.obj, mCamera); } return ; case CAMERA_MSG_POSTVIEW_FRAME: if (mPostviewCallback != null) { mPostviewCallback.onPictureTaken((byte[])msg.obj, mCamera); } return ; case CAMERA_MSG_FOCUS: AutoFocusCallback cb = null; synchronized (mAutoFocusCallbackLock) { cb = mAutoFocusCallback; } if (cb != null) { boolean success = msg.arg1 == 0 ? false : true ; cb.onAutoFocus(success, mCamera); } return ; case CAMERA_MSG_ZOOM: if (mZoomListener != null) { mZoomListener.onZoomChange(msg.arg1, msg.arg2 != 0 , mCamera); } return ; case CAMERA_MSG_PREVIEW_METADATA: if (mFaceListener != null) { mFaceListener.onFaceDetection((Face[])msg.obj, mCamera); } return ; case CAMERA_MSG_ERROR : Log.e(TAG, "Error " + msg.arg1); if (mErrorCallback != null) { mErrorCallback.onError(msg.arg1, mCamera); } return ; case CAMERA_MSG_FOCUS_MOVE: if (mAutoFocusMoveCallback != null) { mAutoFocusMoveCallback.onAutoFocusMoving(msg.arg1 == 0 ? false : true , mCamera); } return ; default : Log.e(TAG, "Unknown message type " + msg.what); return ; } } }
到这里基本上就是上层的处理了,callback都是在相机应用中设置的,然后各种数据就在相机应用中得到对应的处理。
具体的每一个数据怎么处理,这里我们不做分析,后续有需要,可以在细讲一下,旨在弄清楚代码是怎么走的。
本文中代码使用的是Android5.1原始代码,欢迎大家留言交流。